When handling fpswa fault, Xen needs to fetch opcode, it may fail.
This patch finishes retry mechanism.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
UINT64 vector,REGS *regs)
{
+ UINT64 status;
VCPU *vcpu = current;
UINT64 vpsr = VCPU(vcpu, vpsr);
vector=vec2off[vector];
}
else{ // handle fpswa emulation
// fp fault
- if(vector == IA64_FP_FAULT_VECTOR && !handle_fpu_swa(1, regs, isr)){
- vmx_vcpu_increment_iip(vcpu);
- return;
+ if (vector == IA64_FP_FAULT_VECTOR) {
+ status = handle_fpu_swa(1, regs, isr);
+ if (!status) {
+ vmx_vcpu_increment_iip(vcpu);
+ return;
+ } else if (IA64_RETRY == status)
+ return;
}
//fp trap
- else if(vector == IA64_FP_TRAP_VECTOR && !handle_fpu_swa(0, regs, isr)){
- return;
+ else if (vector == IA64_FP_TRAP_VECTOR) {
+ status = handle_fpu_swa(0, regs, isr);
+ if (!status)
+ return;
+ else if (IA64_RETRY == status) {
+ vmx_vcpu_decrement_iip(vcpu);
+ return;
+ }
}
}
VCPU(vcpu,isr)=isr;
}
+IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ IA64_PSR *ipsr = (IA64_PSR *)®s->cr_ipsr;
+
+ if (ipsr->ri == 0) {
+ ipsr->ri = 2;
+ regs->cr_iip -= 16;
+ } else {
+ ipsr->ri--;
+ }
+ return (IA64_NO_FAULT);
+}
+
+
IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
{
REGS *regs = vcpu_regs(vcpu);
#define IA64_NO_FAULT 0x0000
#define IA64_FAULT 0x0001
#define IA64_RFI_IN_PROGRESS 0x0002
-#define IA64_RETRY 0x0003
+// To avoid conflicting with return value of handle_fpu_swa()
+// set IA64_RETRY to -0x000f
+#define IA64_RETRY (-0x000f)
#define IA64_FORCED_IFA 0x0004
#define IA64_USE_TLB 0x0005
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
extern void vcpu_load_kernel_regs(VCPU *vcpu);
extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
+extern IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu);
extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
extern void dtlb_fault (VCPU *vcpu, u64 vadr);